## Loading required package: knitr

require(ggplot2)
require(lattice) # nicer scatter plots
require(plyr)
require(grid) # contains the arrow function
require(biOps)
require(doMC) # for parallel code
require(EBImage)
## To install EBImage
# source("http://bioconductor.org/biocLite.R")
# biocLite("EBImage")

# start parallel environment
registerDoMC()
# functions for converting images back and forth
im.to.df<-function(in.img) {
  out.im<-expand.grid(x=1:nrow(in.img),y=1:ncol(in.img))
  out.im$val<-as.vector(in.img)
  out.im
  }
df.to.im<-function(in.df,val.col="val",inv=F) {
  in.vals<-in.df[[val.col]]
  if(class(in.vals[1])=="logical") in.vals<-as.integer(in.vals*255)
  if(inv) in.vals<-255-in.vals
  out.mat<-matrix(in.vals,nrow=length(unique(in.df$x)),byrow=F)
  attr(out.mat,"type")<-"grey"
  out.mat
  }
ddply.cutcols<-function(...,cols=1) {
  # run standard ddply command
  cur.table<-ddply(...)
  cutlabel.fixer<-function(oVal) {
    sapply(oVal,function(x) {
      cnv<-as.character(x)
      mean(as.numeric(strsplit(substr(cnv,2,nchar(cnv)-1),",")[[1]]))
      })
    }
  cutname.fixer<-function(c.str) {
    s.str<-strsplit(c.str,"(",fixed=T)[[1]]
    t.str<-strsplit(paste(s.str[c(2:length(s.str))],collapse="("),",")[[1]]
    paste(t.str[c(1:length(t.str)-1)],collapse=",")
    }
  for(i in c(1:cols)) {
    cur.table[,i]<-cutlabel.fixer(cur.table[,i])
    names(cur.table)[i]<-cutname.fixer(names(cur.table)[i])
    }
  cur.table
  }

colMeans.df<-function(x,...) as.data.frame(t(colMeans(x,...)))

Quantitative Big Imaging

author: Kevin Mader date: 20 March 2014 width: 1440 height: 900 css: ../template.css transition: rotate

ETHZ: 227-0966-00L

Analysis of Single Objects

Course Outline

Literature / Useful References

Previously on QBI ...

Learning Objectives

Motivation (Why and How?)

Outline


Motivation

We have dramatically simplified our data, but there is still too much.

2560 x 2560 x 2160 x 32 bit

56GB / sample


\[\downarrow\]

2560 x 2560 x 2160 x 1 bit

(1.75GB / sample)

What did we want in the first place

Single number:

From the labels

cell.im<-jpeg::readJPEG("ext-figures/Cell_Colony.jpg")
cell.lab.df<-im.to.df(bwlabel(cell.im<.6))
size.histogram<-ddply(subset(cell.lab.df,val>0),.(val),function(c.label) data.frame(count=nrow(c.label)))
keep.vals<-subset(size.histogram,count>25)
cur.cell.id<-subset(cell.lab.df,val %in% keep.vals$val)$val[1]
cur.cell.df<-subset(cell.lab.df,val==cur.cell.id)
ggplot(subset(cell.lab.df,val %in% keep.vals$val),aes(x=x,y=y,fill=as.numeric(as.factor(val))))+
  geom_raster()+
  geom_tile(data=cur.cell.df,fill="red",alpha=0.5)+
  labs(fill="Label",title="Larger Cells (>25px)")+
  theme_bw(20)+coord_equal()
Cell Colony


What we would like to to do

COV: With a single object

\[ I_{id}(x,y) = \begin{cases} 1, & L(x,y) = id \\ 0, & \text{otherwise} \end{cases}\]

mean.df<-colMeans.df(cur.cell.df[,c("x","y")])
ggplot(cur.cell.df,aes(x=x,y=y))+
  geom_tile(color="black",fill="grey")+
  geom_point(data=mean.df,color="red",pch=3,size=20)+
  labs(title="Single Cell")+
  theme_bw(20)+coord_equal()+guides(fill=F)
Single Cell


Define a center

\[ \bar{x} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} \vec{v}\cdot\vec{i} \] \[ \bar{y} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} \vec{v}\cdot\vec{j} \] \[ \bar{z} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} \vec{v}\cdot\vec{k} \]

COM: With a single object

If the gray values are kept (or other meaningful ones are used), this can be seen as a weighted center of volume or center of mass (using \(I_{gy}\) to distinguish it from the labels)

commean.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    weight.sum<-sum(c.cell$weight)
    data.frame(xv=mean(c.cell$x),
               yv=mean(c.cell$y),
               xm=with(c.cell,sum(x*weight)/weight.sum),
               ym=with(c.cell,sum(y*weight)/weight.sum)
               )
    })
  }
cur.cell.df.weight<-cbind(cur.cell.df,weight=with(cur.cell.df,(x-17)^2+(y-9)^2))
commean.df<-commean.fun(cur.cell.df.weight)
ggplot(cur.cell.df.weight,aes(x=x,y=y))+
  geom_tile(aes(fill=weight),color="black")+
  geom_point(data=commean.df,aes(x=xv,y=yv,color="COV"),pch=16,size=20)+
  geom_point(data=commean.df,aes(x=xm,y=ym,color="COM"),pch=16,size=20)+
  labs(title="Single Cell",color="Center",fill="Igy")+
  theme_bw(20)+coord_equal()#+guides(fill=T)
Single Cell


Define a center

\[ \Sigma I_{gy} = \frac{1}{N} \sum_{\vec{v}\in I_{id}} I_{gy}(\vec{v}) \] \[ \bar{x} = \frac{1}{\Sigma I_{gy}} \sum_{\vec{v}\in I_{id}} (\vec{v}\cdot\vec{i}) I_{gy}(\vec{v}) \] \[ \bar{y} = \frac{1}{\Sigma I_{gy}} \sum_{\vec{v}\in I_{id}} (\vec{v}\cdot\vec{j}) I_{gy}(\vec{v}) \] \[ \bar{z} = \frac{1}{\Sigma I_{gy}} \sum_{\vec{v}\in I_{id}} (\vec{v}\cdot\vec{k}) I_{gy}(\vec{v}) \]

Extents: With a single object

Exents or caliper lenghts are the size of the object in a given direction. Since the coordinates of our image our \(x\) and \(y\) the extents are calculated in these directions

# since the edge of the pixel is 0.5 away from the middle of the pixel
emin<-function(...) min(...)-0.5
emax<-function(...) max(...)+0.5
extents.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
    out.df<-cbind(c.cell.mean,data.frame(xmin=c(c.cell.mean$x,emin(c.cell$x)),
                                         xmax=c(c.cell.mean$x,emax(c.cell$x)),
                                         ymin=c(emin(c.cell$y),c.cell.mean$y),
                                         ymax=c(emax(c.cell$y),c.cell.mean$y)))
    })
  }
cell.extents<-extents.fun(cur.cell.df)
ggplot(cur.cell.df,aes(x=x,y=y))+
  geom_tile(color="black",fill="grey")+
  geom_segment(data=cell.extents,aes(x=xmin,y=ymin, xend=xmax,yend=ymax),color="red",size=2,lineend="square")+
  labs(title="Single Cell")+
  theme_bw(20)+coord_equal()+guides(fill=F)
Single Cell


Define extents as the minimum and maximum values along the projection of the shape in each direction \[ \text{Ext}_x = \left\{ \forall \vec{v}\in I_{id}: max(\vec{v}\cdot\vec{i})-min(\vec{v}\cdot\vec{i}) \right\} \] \[ \text{Ext}_y = \left\{ \forall \vec{v}\in I_{id}: max(\vec{v}\cdot\vec{j})-min(\vec{v}\cdot\vec{j}) \right\} \] \[ \text{Ext}_z = \left\{ \forall \vec{}\in I_{id}: max(\vec{v}\cdot\vec{k})-min(\vec{v}\cdot\vec{k}) \right\} \]

Anisotropy: What is it?

By definition (New Oxford American): varying in magnitude according to the direction of measurement.


Due to its very vague definition, it can be mathematically characterized in many different very much unequal ways (in all cases 0 represents a sphere)

\[ Aiso1 = \frac{\text{Longest Side}}{\text{Shortest Side}} - 1 \]

\[ Aiso2 = \frac{\text{Longest Side}-\text{Shortest Side}}{\text{Longest Side}} \]

\[ Aiso3 = \frac{\text{Longest Side}}{\text{Average Side Length}} - 1 \]

\[ Aiso4 = \frac{\text{Longest Side}-\text{Shortest Side}}{\text{Average Side Length}} \]

\[ \cdots \rightarrow \text{ ad nauseum} \]

Anisotropy: Which definition makes sense?

Let's take some sample objects

# test function for ellipse generation
# ggplot(ldply(seq(-pi,pi,length.out=100),function(th) create.ellipse.points(a=1,b=2,th.off=th,th.val=th)),aes(x=x,y=y))+geom_path()+facet_wrap(~th.val)+coord_equal()
create.ellipse.points<-function(x.off=0,y.off=0,a=1,b=NULL,th.off=0,th.max=2*pi,pts=36,...) {
  if (is.null(b)) b<-a
  th<-seq(0,th.max,length.out=pts)
  data.frame(x=a*cos(th.off)*cos(th)+b*sin(th.off)*sin(th)+x.off,
             y=-1*a*sin(th.off)*cos(th)+b*cos(th.off)*sin(th)+y.off,
             id=as.factor(paste(x.off,y.off,a,b,th.off,pts,sep=":")),...)
  }
deform.ellipse.draw<-function(c.box) {
  create.ellipse.points(x.off=c.box$x[1],
                        y.off=c.box$y[1],
                        a=c.box$a[1],
                        b=c.box$b[1],
                        th.off=c.box$th[1],
                        col=c.box$col[1])                    
  }
test.objs<-data.frame(x=0,y=0,th.off=0,a=5,b=rev(c(1e-3,1e-2,1e-1,1,2,3,4,5)))
test.objs$col<-1:nrow(test.objs)
test.objs$x.extents<-2*test.objs$a
test.objs$y.extents<-2*test.objs$b
ggplot(ddply(test.objs,.(col),deform.ellipse.draw),
       aes(x=x,y=y,group=as.factor(id)))+
  geom_polygon(color="black",fill="blue")+coord_equal()+
  facet_grid(col~.)+theme_bw(15)
Sample Objects


s.aiso<-function(lside,sside) (lside/sside-1)
aiso<-function(lside,sside) ((lside-sside)/lside)
a.aiso<-function(lside,sside) (2*lside/(lside+sside)-1)
as.aiso<-function(lside,sside) (2*(lside-sside)/(lside+sside))
awrap<-function(c.fun) function(in.ell.dim) c.fun(pmax(2*in.ell.dim$a,2*in.ell.dim$b),pmin(2*in.ell.dim$a,2*in.ell.dim$b))
aiso.table.fn<-function(in.objs) {
  ddply(in.objs,.(b),function(c.ell) {
    data.frame(Aiso1=awrap(s.aiso)(c.ell),
               Aiso2=awrap(aiso)(c.ell),
               Aiso3=awrap(a.aiso)(c.ell),
               Aiso4=awrap(as.aiso)(c.ell))
    })
  }
aiso.table<-aiso.table.fn(test.objs)
names(aiso.table)[1]<-"Y Extent"
kable(aiso.table,digits=2)
Y Extent Aiso1 Aiso2 Aiso3 Aiso4
0.00 4999.00 1.00 1.00 2.00
0.01 499.00 1.00 1.00 1.99
0.10 49.00 0.98 0.96 1.92
1.00 4.00 0.80 0.67 1.33
2.00 1.50 0.60 0.43 0.86
3.00 0.67 0.40 0.25 0.50
4.00 0.25 0.20 0.11 0.22
5.00 0.00 0.00 0.00 0.00

Anisotropy: Which definition makes sense?

ggplot(ddply(test.objs,.(col),deform.ellipse.draw),
       aes(x=x,y=y,group=as.factor(id)))+
  geom_polygon(color="black",fill="blue")+coord_equal()+
  facet_wrap(~col)+theme_bw(15)
Sample Objects


Anisotropy vs \(x\) extents

names(aiso.table)[1]<-"y.extent"
ggplot(aiso.table,aes(x=y.extent))+
  geom_line(aes(y=Aiso1,color="Aiso1"))+
  geom_line(aes(y=Aiso2,color="Aiso2"))+
  geom_line(aes(y=Aiso3,color="Aiso3"))+
  geom_line(aes(y=Aiso4,color="Aiso4"))+
  scale_y_log10()+
  labs(x="Y extents",y="Anisotropy",color="Metric")+
  theme_bw(20)
Anisotropy Formulas

Distributions: Randomly Sized Objects

Objects with uniformally distributed, independent \(x\) and \(y\) extents

m.count<-5000
many.objs<-data.frame(x=0,y=0,th.off=0,a=runif(m.count,0,20),b=runif(m.count,0,20))
many.objs$col<-1:nrow(many.objs)
many.objs$x.extents<-2*many.objs$a
many.objs$y.extents<-2*many.objs$b
maiso.table<-aiso.table.fn(many.objs)
ggplot(ddply(subset(many.objs,col<16),.(col),deform.ellipse.draw),
       aes(x=x,y=y,group=as.factor(id)))+
  geom_polygon(color="black",fill="blue")+coord_equal()+
  facet_wrap(~col,ncol=5)+theme_bw(15)
Sample Objects

ggplot(many.objs)+
  geom_density(aes(x=x.extents,color="X"),size=2)+
  geom_density(aes(x=y.extents,color="Y"),size=2)+
  #scale_x_log10()+
  labs(x="Extent Length (pixels)",color="Axis")+
  guides(fill=F)+
  xlim(0,80)+
  theme_bw(20)
Anisotropy Formulas


splom(subset(maiso.table,Aiso1<10)[,c("Aiso1","Aiso2","Aiso3","Aiso4")],pch=16)
Anisotropy Formulas

ggplot(maiso.table)+
  geom_density(aes(x=Aiso1,color="Aiso1"),size=2)+
  geom_density(aes(x=Aiso2,color="Aiso2"),fill="green",alpha=0.5)+
  geom_density(aes(x=Aiso3,color="Aiso3"),size=2)+
  geom_density(aes(x=Aiso4,color="Aiso4"),size=2)+
  #scale_x_log10()+
  labs(x="Anisotropy",color="Metric")+
  xlim(0,2)+guides(fill=F)+
  theme_bw(20)
Anisotropy Formulas

Extents: With all objects

ggplot(subset(cell.lab.df,val %in% keep.vals$val),aes(x=x,y=y,
                                                      fill=as.numeric(as.factor(val))))+
  geom_raster()+
  geom_tile(data=cur.cell.df,fill="red",alpha=0.5)+
  labs(fill="Label",title="Labeled Cells")+
  theme_bw(20)#+coord_equal()
All Cells Labeled


cell.extents.all<-extents.fun(subset(cell.lab.df,val %in% keep.vals$val))
ggplot(cell.extents.all,aes(x=x,y=y))+
  geom_segment(aes(x=xmin,y=ymin, xend=xmax,yend=ymax),color="red",lineend="square")+
  geom_point(aes(color=as.numeric(as.factor(val))),size=2)+
  labs(title="COM and Extents",color="Label")+
  theme_bw(20)#+coord_equal()
All Cells

COV and Extents: All Cells

mean.pos.subfun<-function(in.df) {
  ddply(in.df,.(val),function(c.cell) {
    c.mean<-colMeans.df(c.cell)
    data.frame(x=c.cell$x-c.mean$x,y=c.cell$y-c.mean$y)
    })
  }
mean.sub.df<-mean.pos.subfun(subset(cell.lab.df,val %in% keep.vals$val))

ggplot(cell.extents.all,aes(x=x-x,y=y-y))+
  geom_raster(data=mean.sub.df,aes(x=x,y=y))+
  geom_segment(aes(x=xmin-x,y=ymin-y, xend=xmax-x,yend=ymax-y),
               color="red")+
  geom_point(color="red",size=2)+
  facet_wrap(~val,ncol=14)+
  labs(x="x - COM",y="y - COM")+
  theme_bw(15)+coord_equal()
All Cells

Bounding Box: All Cells

bbox.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
    xmn<-emin(c.cell$x)
    xmx<-emax(c.cell$x)
    ymn<-emin(c.cell$y)
    ymx<-emax(c.cell$y)
    out.df<-cbind(c.cell.mean,
                  data.frame(xi=c(xmn,xmn,xmx,xmx,xmn),
                             yi=c(ymn,ymx,ymx,ymn,ymn),
                             xw=xmx-xmn,
                             yw=ymx-ymn
                             ))
    })
  }
cell.bbox.all<-bbox.fun(subset(cell.lab.df,val %in% keep.vals$val))
ggplot(cell.bbox.all,aes(x=x-x,y=y-y))+
  geom_raster(data=mean.sub.df,aes(x=x,y=y))+
  geom_path(aes(x=xi-x,y=yi-y),
            color="red")+
  geom_point(color="red",size=2)+
  facet_wrap(~val,ncol=14)+
  labs(x="x - COM",y="y - COM")+
  theme_bw(15)+coord_equal()
All Cells Bounding Box

Sorted by Anisotropy

cell.bbox.all$aiso<-with(cell.bbox.all,(pmax(xw,yw)-pmin(xw,yw))/pmax(xw,yw)+val/1e5)

ggplot(cell.bbox.all,aes(x=x-x,y=y-y))+
  #geom_raster(data=mean.sub.df,aes(x=x,y=y))+
  geom_path(aes(x=xi-x,y=yi-y,group=val),
            color="red")+
  geom_point(color="red",size=2)+
  facet_wrap(~aiso,ncol=14)+
  labs(x="x - COM",y="y - COM")+
  theme_bw(15)+coord_equal()
All Cells Bounding Box

Bounding Box is a Poor Approximation

While easy to calculate, the bounding box / extents approach is a very rough approximation for most of the objects in our image. In particular objects which are not parallel to the \(XY\)-axes are misrepresented.

Possible Solutions

Useful Statistical Tools: Principal Component Analysis

While many of the topics covered in Linear Algebra and Statistics courses might not seem very applicable to real problems at first glance, at least a few of them come in handy for dealing distributions of pixels (they will only be briefly covered, for more detailed review look at some of the suggested material)

Principal Component Analysis

Similar to K-Means insofar as we start with a series of points in a vector space and want to condense the information. With PCA instead of searching for distinct groups, we try to find a linear combination of components which best explain the variance in the system.


As an example we will use a very simple example of corn and chicken prices vs time

n.pts<-20
test.pca<-data.frame(time=c(1:n.pts),
                     corn.price=0.01*runif(n.pts)+0.05)
test.pca$chicken.price<-with(test.pca,corn.price+0.01*runif(n.pts))
ggplot(test.pca,aes(x=time))+
  geom_line(aes(y=corn.price,color="Corn Price ($/kg)"))+
  geom_line(aes(y=chicken.price,color="Chicken Price ($/g)"))+
  labs(x="Time (days)",y="Price",color="Commodity")+
  theme_bw(20)
Corn and Chicken

test.pca.princomp<-princomp(test.pca[,-1])
test.pca$pca1<-test.pca.princomp$scores[,1]
test.pca$pca2<-test.pca.princomp$scores[,2]
ggplot(test.pca,aes(x=time))+
  geom_line(aes(y=pca1,color="Principal Component #1"))+
  geom_line(aes(y=pca2,color="Principal Component #2"))+
  geom_line(aes(y=corn.price,color="Corn Price ($/kg)"))+
  geom_line(aes(y=chicken.price,color="Chicken Price ($/g)"))+
  labs(x="Time (days)",y="Price",color="Commodity")+
  theme_bw(20)
PCA Example

Useful Statistical Tools: Principal Component Analysis

The first principal component condenses the correlated information in both the chicken and corn prices (perhaps the underlying cost of fuel) since it explains the most variance in the final table of corn and chicken prices.

ggplot(test.pca,aes(x=pca1))+
  geom_point(aes(y=corn.price,color="Corn Price ($/kg)"))+
  geom_point(aes(y=chicken.price,color="Chicken Price ($/g)"))+
  labs(x="Principal Component #1",y="Price",color="Commodity")+
  theme_bw(20)
PCA Example


The second principal component is then related to the unique information seperating chicken from corn prices but neither indices directly themselves (maybe the cost of antibiotics)

ggplot(test.pca,aes(x=pca2))+
  geom_point(aes(y=corn.price,color="Corn Price ($/kg)"))+
  geom_point(aes(y=chicken.price,color="Chicken Price ($/g)"))+
  geom_point(aes(y=5*(chicken.price-corn.price),color="Difference in Chicken-Corn"))+
  labs(x="Principal Component #2",y="Price",color="Commodity")+
  theme_bw(20)
PCA Example

Applied PCA: Shape Tensor

How do these statistical analyses help us?

Going back to a single cell, we have the a distribution of \(x\) and \(y\) values.

A principal component analysis of the voxel positions, will calculate two new principal components (the components themselves are the relationships between the input variables and the scores are the final values.)


pca.fun<-function(in.df) {
  ddply(in.df,.(val), function(c.cell) {
    c.cell.cov<-cov(c.cell[,c("x","y")])
    c.cell.eigen<-eigen(c.cell.cov)
    
    c.cell.mean<-colMeans.df(c.cell[,c("x","y")])
    out.df<-cbind(c.cell.mean,
                  data.frame(vx=c.cell.eigen$vectors[1,],
                             vy=c.cell.eigen$vectors[2,],
                             vw=sqrt(c.cell.eigen$values),
                             th.off=atan2(c.cell.eigen$vectors[2,],c.cell.eigen$vectors[1,]))
                  )
    })
  }

cur.cell.id<-subset(cell.lab.df,val %in% keep.vals$val)$val[100]
cur.cell.id<-58
cur.cell.df<-subset(cell.lab.df,val==cur.cell.id)

cell.pca<-pca.fun(cur.cell.df)
ggplot(cur.cell.df,aes(x=x,y=y))+
  geom_tile(color="black",fill="grey")+
  geom_segment(data=cell.pca,aes(x=x,y=y,xend=x+vx*vw,yend=y+vy*vw,
                                 color=as.factor(round(vw*10)/10)),
               arrow=arrow(length = unit(0.3,"cm")),size=2)+
  labs(title="Single Cell",color="Score /\nEigenvalue")+
  theme_bw(20)+coord_equal()+guides(fill=F)
Single Cell

How did we calculate that?

We start off by calculating the covariance matrix from the list of \(x\), \(y\), and \(z\) points that make up our object of interest.

\[ COV(I_{id}) = \frac{1}{N} \sum_{\forall\vec{v}\in I_{id}} \begin{bmatrix} \vec{v}_x\vec{v}_x & \vec{v}_x\vec{v}_y & \vec{v}_x\vec{v}_z\\ \vec{v}_y\vec{v}_x & \vec{v}_y\vec{v}_y & \vec{v}_y\vec{v}_z\\ \vec{v}_z\vec{v}_x & \vec{v}_z\vec{v}_y & \vec{v}_z\vec{v}_z \end{bmatrix} \]

We then take the eigentransform of this array to obtain the eigenvectors (principal components, \(\vec{\Lambda}_{1\cdots 3}\)) and eigenvalues (scores, \(\lambda_{1\cdots 3}\))

\[ COV(I_{id}) \rightarrow \begin{bmatrix} \vec{\Lambda}_{1x} & \vec{\Lambda}_{1y} & \vec{\Lambda}_{1z} \\ \vec{\Lambda}_{2x} & \vec{\Lambda}_{2y} & \vec{\Lambda}_{2z} \\ \vec{\Lambda}_{3x} & \vec{\Lambda}_{3y} & \vec{\Lambda}_{3z} \end{bmatrix} * \begin{bmatrix} \lambda_1 \\ \lambda_2 \\ \lambda_3 \end{bmatrix} \] The principal components tell us about the orientation of the object and the scores tell us about the corresponding magnitude (or length) in that direction.

Principal Component Analysis: Take home message

Principal Component Analysis: Elliptical Model

While the eigenvalues and eigenvectors are in their own right useful

Ellipsoidal Representation

  1. Center of Volume is calculated normally
  2. Eigenvectors represent the unit vectors for the semiaxes of the ellipsoid
  3. \(\sqrt{\text{Eigenvalues}}\) is proportional to the length of the semiaxis (\(\mathcal{l}=\sqrt{5\lambda_i}\)), derivation similar to moment of inertia tensor for ellipsoids.

vec.to.ellipse<-function(pca.df) {
  ddply(pca.df,.(val),function(cur.pca) {
    # assume there are two vectors now
    create.ellipse.points(x.off=cur.pca[1,"x"],y.off=cur.pca[1,"y"],
                          b=sqrt(5)*cur.pca[1,"vw"],a=sqrt(5)*cur.pca[2,"vw"],
                          th.off=pi/2-atan2(cur.pca[1,"vy"],cur.pca[1,"vx"]),
                          x.cent=cur.pca[1,"x"],y.cent=cur.pca[1,"y"])
    })
  }

cell.ellipse<-vec.to.ellipse(cell.pca)
ggplot(cur.cell.df,aes(x=x,y=y))+
  geom_tile(color="black",fill="grey")+
  geom_segment(data=cell.pca,aes(x=x,y=y,xend=x+vx*vw,yend=y+vy*vw,
                                 color=as.factor(round(vw*10)/10)),
               arrow=arrow(length = unit(0.3,"cm")),size=2)+
  geom_path(data=cell.ellipse)+
  labs(title="Single Cell",color="Score /\nEigenvalue")+
  theme_bw(20)+coord_equal()+guides(fill=F)
Single Cell

Elliptical Model vs Bounding Box

ggplot(cur.cell.df,aes(x=x,y=y))+
  geom_tile(color="black",fill="grey")+
  geom_path(data=bbox.fun(cur.cell.df),aes(x=xi,y=yi,color="Bounding\nBox"))+
  geom_path(data=cell.ellipse,aes(color="Ellipse"))+
  labs(title="Single Cell",color="Shape\nAnalysis\nMethod")+
  theme_bw(20)+coord_equal()+guides(fill=F)
Single Cell


Elliptical Model for All Samples

cell.pca.all<-pca.fun(subset(cell.lab.df,val %in% keep.vals$val))
cell.ell.all<-vec.to.ellipse(cell.pca.all)
ggplot(cell.bbox.all,aes(x=x-x,y=y-y))+
  geom_raster(data=mean.sub.df,aes(x=x,y=y))+
  #geom_path(aes(x=xi-x,y=yi-y),color="red")+
  geom_path(data=cell.ell.all,aes(x=x-x.cent,y=y-y.cent),color="red")+
  geom_point(color="red",size=2)+
  geom_segment(data=cell.pca.all,aes(xend=vx*vw,yend=vy*vw),
               arrow=arrow(length = unit(0.3,"cm")),color="red")+
  facet_wrap(~val,ncol=14)+
  labs(x="x - COM",y="y - COM")+
  theme_bw(15)+coord_equal()
All Cells Elliptical Model

Comparison between Anisotropy for both models

pca.aiso.all<-ddply(cell.pca.all,.(val),function(cur.pca) {
  lon.ax<-max(cur.pca$vw)
  sho.ax<-min(cur.pca$vw)
  data.frame(e.aiso=(lon.ax-sho.ax)/lon.ax)
  })
both.aiso<-merge(pca.aiso.all,cell.bbox.all,by="val")
no.aiso.vals<-subset(both.aiso,aiso<0.01 & e.aiso>0.2)$val
ggplot(both.aiso,aes(x=aiso,y=e.aiso))+
  geom_point(aes(color=ifelse(aiso<0.01,"lost information","normal")))+geom_smooth()+
  labs(x="Extents / BB Anisotropy",y="Elliptical Anisotropy",color="Points")+
  coord_equal()+theme_bw(20)
All Cells Elliptical Model


We see that there seems to be a general, albeit weak, correlation between the two measures. The most concerning portion is however the left side where the extents or bounding box method reports 0 anisotropy and the elliptical method reports substancial amounts of it.

ggplot(subset(cell.bbox.all,val %in% no.aiso.vals),aes(x=x-x,y=y-y))+
  geom_raster(data=subset(mean.sub.df,val %in% no.aiso.vals),aes(x=x,y=y))+
  geom_path(aes(x=xi-x,y=yi-y,color="BB"))+
  geom_path(data=subset(cell.ell.all,val %in% no.aiso.vals),
            aes(x=x-x.cent,y=y-y.cent,color="Ellipse"))+
  geom_point(color="red",size=2)+
  geom_segment(data=subset(cell.pca.all,val %in% no.aiso.vals),
               aes(xend=vx*vw,yend=vy*vw),
               arrow=arrow(length = unit(0.3,"cm")),color="red")+
  facet_wrap(~val)+
  labs(x="x - COM",y="y - COM")+
  theme_bw(15)+coord_equal()
All Cells Elliptical Model

3D Shape Analysis

The models we have done are all applicable to both 2D and 3D images. The primary difference is when looking at 3D images there is an extra dimension to consider.

xg<-seq(-10,10,length.out=41)
make.ellipse.3d<-function(a,b,c,xg) {
  ell.3d<-expand.grid(x=xg,y=xg,z=xg)
  ell.3d$val<-with(ell.3d,(x/a)^2+(y/b)^2+(z/c)^2 < 1)
  ell.3d
  }
cloud(z~x*y,data=subset(make.ellipse.3d(9,3,10,xg),val),xlim=c(-10,10),ylim=c(-10,10),zlim=c(-10,10))
3D Ellipse


It can also be shown as a series of 2D slices.

ggplot(subset(make.ellipse.3d(9,3,10,xg),val),aes(x=x,y=y))+
  geom_tile(color="blue")+
  facet_wrap(~z)+
  coord_equal()+
  xlim(c(-10,10))+ylim(c(-10,10))+
  theme_bw(10)
3D Ellipse

Anisotropy applies to these samples as well but it only gives us information about the shortest and the longest dimensions. Is that enough?

Pancakeness

Each of these images has the exact same anisotropy because the shortest semiaxis length remains

xg<-seq(-10,10,length.out=41)
b.vals<-seq(3,10,length.out=4)
cloud(z~x*y | as.factor(b), data=ldply(b.vals,function(b) 
  cbind(subset(make.ellipse.3d(b,10,3,xg),val),b=b)),
  xlim=c(-10,10),ylim=c(-10,10),zlim=c(-10,10))
Oblateness


If we take a slice through the image we can image the final shape looking like a very small thin rod in the case where the second dimension is equal to 3 (short in two directions and long in the other) and a pankcake (short in one direction and long in the other two).

ggplot(ldply(rev(b.vals),function(b) 
  cbind(subset(make.ellipse.3d(b,10,3,xg),val & z==0),b=b)),
  aes(x=x,y=y))+
  geom_tile(aes(fill=as.factor(round(b*10)/10)),alpha=1)+
  facet_wrap(~z)+
  coord_equal()+
  xlim(c(-10,10))+ylim(c(-10,10))+
  labs(title="Profile Cut-through of Ellipse",fill="Second\nDimension\nLength")+
  theme_bw(20)
Oblateness Slices

Oblateness

We can thus introduce a new metric for assessing the second-degree anisotropy in the object and this we shall somewhat more formally call Oblateness.

\[ \textrm{Ob} = 2\frac{\lambda_{2}-\lambda_{1}}{\lambda_{3}-\lambda_{1}}-1 \]

The value like in anisotropy is bound between -1 and 1.


ggplot(ldply(rev(b.vals),function(b) 
  cbind(subset(make.ellipse.3d(b,10,3,xg),val & z==0),b=b)),
  aes(x=x,y=y))+
  geom_tile(aes(fill=as.factor(round(b*10)/10)),alpha=1)+
  facet_wrap(~z)+
  coord_equal()+
  xlim(c(-10,10))+ylim(c(-10,10))+
  labs(title="Profile Cut-through of Ellipse",fill="Second\nDimension\nLength")+
  theme_bw(20)
Oblateness Slices

ob.vals<-data.frame(b=b.vals,obl=2*(b.vals-3)/(10-3)-1
                    )
ggplot(ob.vals,aes(x=b,y=obl))+
  geom_line()+
  labs(x="Second Dimension Length",y="Oblateness")+
  theme_bw(20)
Oblateness Slices

Interfaces / Surfaces

Many physical and chemical processes occur at surfaces and interfaces and consequently these structures are important in material science and biology. For this lecture surface and interface will be used interchangebly and refers to the boundary between two different materials (calcified bone and soft tissue, magma and water, liquid and gas) Through segmentation we have identified the unique phases in the sample under investigation.

Meshing

Constructing a mesh for an image provides very different information than the image data itself. Most crucially this comes when looking at physical processes like deformation.

While the images are helpful for visualizing we rarely have models for quantifying how difficult it is to turn a pixel off

base.grid<-expand.grid(x=c(-10:10),y=c(-10:10))
base.shape.undef<-cbind(base.grid,val=with(base.grid,abs(x)<6 & abs(y)<6))
def.grid<-rbind(
  cbind(base.shape.undef,type="Before"),
  cbind(base.grid,val=with(base.grid,abs(x)<10 & abs(y)<4),type="After")
  )
ggplot(def.grid,
       aes(x=x,y=y,fill=ifelse(val,"on","off")))+
  geom_tile(alpha=0.80,color="black")+
  coord_equal()+facet_wrap(~type)+
  xlim(c(-10,10))+ylim(c(-10,10))+
  labs(title="Pixel-based Deformation",fill="Value")+
  theme_bw(20)
Deformation in Pixels


If the image is turned into a mesh we now have a list of vertices and edges. For these vertices and edges we can define forces. For example when looking at stress-strain relationships in mechanics using Hooke's Model \[ \vec{F}=k (\vec{x}-\vec{x_0}) \] the force needed to stretch one of these edges is proportional to how far it is stretched.

df.to.mesh<-function(in.df,d.size.x=0.5,d.size.y=0.5,make.seg=F) {
  corner.pts<-expand.grid(dx=c(-d.size.x,d.size.x),dy=c(-d.size.y,d.size.y))
  min.x<-min(in.df$x)
  max.x<-max(in.df$x)
  min.y<-min(in.df$y)
  max.y<-max(in.df$y)
  mesh.pts<-ddply(in.df,.(x,y),function(cur.df) {
    xy.cols<-c("x","y")
    pos.data<-cur.df[1,xy.cols]
    n.data<-cur.df[,!(names(cur.df) %in% xy.cols)]
    ddply(corner.pts,.(dx,dy),function(c.offset) {
      cbind(x=pos.data$x+c.offset$dx[1],y=pos.data$y+c.offset$dy[1],n.data)
      })
    })
  if(make.seg) {
    grid.pts<-expand.grid(dx=c(-d.size.x,0,d.size.x),dy=c(-d.size.y,0,d.size.y))
    grid.pts$seg.len<-with(grid.pts,sqrt(dx^2+dy^2))
    
    subset(
      ddply(subset(grid.pts,((abs(dx)==0) + (abs(dy)==0))==1),.(dx,dy,seg.len),
            function(c.offset) {
      cbind(xend=mesh.pts$x+c.offset$dx[1],yend=mesh.pts$y+c.offset$dy[1],
            mesh.pts)
      }),
      xendmin.x & yendmin.y)
    } else {
      mesh.pts
    }
  }
undef.mesh<-df.to.mesh(subset(base.shape.undef,val),make.seg=T)
base.shape.def<-base.shape.undef
base.shape.def$x<-base.shape.def$x*10/6
base.shape.def$y<-base.shape.def$y*4/6
def.mesh<-df.to.mesh(subset(base.shape.def,val),
                       d.size.x=0.5*10/6,d.size.y=0.5*4/6,make.seg=T)

ggplot(rbind(cbind(undef.mesh,type="Before"),
             cbind(def.mesh,type="After")),
       aes(x=x,y=y))+

  geom_segment(aes(xend=xend,yend=yend,color=sign(seg.len-0.5)),size=2)+
  geom_point(size=2,color="green",pch=20)+
  coord_equal()+facet_wrap(~type)+
  scale_color_gradient2(mid="black",low="red",high="blue",space="Lab")+
  #xlim(c(-10,10))+ylim(c(-10,10))+
  labs(title="Mesh-based Deformation",color="(x-x0)")+
  theme_bw(20)
Deformation with Meshes

Meshing

Since we uses voxels to image and identify the volume we can use the voxels themselves as an approimation for the surface of the structure.

From this we can create a mesh by

A wide variety of methods of which we will only graze the surface (http://en.wikipedia.org/wiki/Image-based_meshing)

Marching Cubes

Why

Voxels are very poor approximations for the surface and are very rough (they are either normal to the x, y, or z axis and nothing between). Because of their inherently orthogonal surface normals, any analysis which utilizes the surface normal to calculate another value (growth, curvature, etc) is going to be very inaccurate at best and very wrong at worst.

How

The image is processed one voxel at a time and the neighborhood (not quite the same is the morphological definition) is checked at every voxel. From this configuration of values, faces are added to the mesh to incorporate the simplist surface which would explain the values.

Marching tetrahedra is for some applications a better suited approach

Next Time on QBI

So while bounding box and ellipse-based models are useful for many object and cells, they do a very poor job with the sample below.

cur.cell.df<-subset(cell.lab.df,val==155)
cell.pca<-pca.fun(cur.cell.df)
cell.ellipse<-vec.to.ellipse(cell.pca)
ggplot(cur.cell.df,aes(x=x,y=y))+
  geom_tile(color="black",fill="grey")+
  geom_path(data=bbox.fun(cur.cell.df),aes(x=xi,y=yi,color="Bounding\nBox"))+
  geom_path(data=cell.ellipse,aes(color="Ellipse"))+
  labs(title="Single Cell",color="Shape\nAnalysis\nMethod")+
  theme_bw(20)+coord_equal()+guides(fill=F)
Single Cell


Why

What to do?